va < gdt_descr->address + gdt_descr->size;
va += PAGE_SIZE, f++) {
frames[f] = virt_to_mfn(va);
- make_page_readonly((void *)va);
+ make_lowmem_page_readonly((void *)va);
}
if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
BUG();
#ifdef CONFIG_X86_PAE
pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- make_page_readonly(pmd_table);
+ make_lowmem_page_readonly(pmd_table);
set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
pud = pud_offset(pgd, 0);
if (pmd_table != pmd_offset(pud, 0))
{
if (pmd_none(*pmd)) {
pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- make_page_readonly(page_table);
+ make_lowmem_page_readonly(page_table);
set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
if (page_table != pte_offset_kernel(pmd, 0))
BUG();
{
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
if (pte)
- make_page_readonly(pte);
+ make_lowmem_page_readonly(pte);
return pte;
}
spin_lock_irqsave(&pgd_lock, flags);
memcpy(pmd, copy_pmd, PAGE_SIZE);
spin_unlock_irqrestore(&pgd_lock, flags);
- make_page_readonly(pmd);
+ make_lowmem_page_readonly(pmd);
set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
}
if (PTRS_PER_PMD > 1) {
for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
- make_page_writable(pmd);
+ make_lowmem_page_writable(pmd);
kmem_cache_free(pmd_cache, pmd);
}
if (!HAVE_SHARED_KERNEL_PMD) {
pmd_t *pmd = (void *)__va(pgd_val(pgd[USER_PTRS_PER_PGD])-1);
- make_page_writable(pmd);
+ make_lowmem_page_writable(pmd);
memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
kmem_cache_free(pmd_cache, pmd);
}
}
#ifndef CONFIG_XEN_SHADOW_MODE
+asmlinkage int xprintk(const char *fmt, ...);
void make_lowmem_page_readonly(void *va)
{
pte_t *pte = virt_to_ptep(va);
pte_t *pte = virt_to_ptep(va);
set_pte(pte, pte_wrprotect(*pte));
if ((unsigned long)va >= (unsigned long)high_memory) {
- unsigned long pfn;
- pfn = pte_pfn(*pte);
+ unsigned long pfn = pte_pfn(*pte);
#ifdef CONFIG_HIGHMEM
if (pfn < highstart_pfn)
#endif
pte_t *pte = virt_to_ptep(va);
set_pte(pte, pte_mkwrite(*pte));
if ((unsigned long)va >= (unsigned long)high_memory) {
- unsigned long pfn;
- pfn = pte_pfn(*pte);
+ unsigned long pfn = pte_pfn(*pte);
#ifdef CONFIG_HIGHMEM
if (pfn < highstart_pfn)
#endif
/* Check the new PTE. */
nl1e = l1e_from_intpte(val);
if ( unlikely(!get_page_from_l1e(nl1e, d)) )
+ {
+ MEM_LOG("ptwr_emulate: could not get_page_from_l1e()");
return X86EMUL_UNHANDLEABLE;
+ }
/* Checked successfully: do the update (write or cmpxchg). */
pl1e = map_domain_page(page_to_pfn(page));
goto emulate;
#endif
+ PTWR_PRINTK("ptwr_page_fault on l1 pt at va %lx, pfn %lx, eip %lx\n",
+ addr, pfn, (unsigned long)regs->eip);
+
/* Get the L2 index at which this L1 p.t. is always mapped. */
l2_idx = page->u.inuse.type_info & PGT_va_mask;
if ( unlikely(l2_idx >= PGT_va_unknown) )
goto emulate;
}
- PTWR_PRINTK("[%c] page_fault on l1 pt at va %lx, pt for %08lx, "
- "pfn %lx\n", PTWR_PRINT_WHICH,
- addr, l2_idx << L2_PAGETABLE_SHIFT, pfn);
-
/*
* We only allow one ACTIVE and one INACTIVE p.t. to be updated at at
* time. If there is already one, we must flush it out.
goto emulate;
}
+ PTWR_PRINTK("[%c] batched ptwr_page_fault at va %lx, pt for %08lx, "
+ "pfn %lx\n", PTWR_PRINT_WHICH, addr,
+ l2_idx << L2_PAGETABLE_SHIFT, pfn);
+
d->arch.ptwr[which].l1va = addr | 1;
d->arch.ptwr[which].l2_idx = l2_idx;
d->arch.ptwr[which].vcpu = current;